for ( ; order <= shadow_max_order(d); ++order )
{
unsigned int n = count;
- const struct shadow_page_info *sp;
+ const struct page_info *sp;
page_list_for_each ( sp, &d->arch.paging.shadow.freelists[order] )
if ( --n == 0 )
* non-Xen mappings in this top-level shadow mfn */
static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
{
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ struct page_info *sp = mfn_to_page(smfn);
switch ( sp->u.sh.type )
{
case SH_type_l2_32_shadow:
/* Convert smfn to gfn */
unsigned long gfn;
ASSERT(mfn_valid(smfn));
- gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->v.sh.back));
+ gfn = mfn_to_gfn(d, _mfn(mfn_to_page(smfn)->v.sh.back));
__trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
sizeof(gfn), (unsigned char*)&gfn);
}
/* Need a vpcu for calling unpins; for now, since we don't have
* per-vcpu shadows, any will do */
struct vcpu *v, *v2;
- struct shadow_page_info *sp, *t;
+ struct page_info *sp, *t;
mfn_t smfn;
int i;
perfc_incr(shadow_prealloc_1);
page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
{
- smfn = shadow_page_to_mfn(sp);
+ smfn = page_to_mfn(sp);
/* Unpin this top-level shadow */
trace_shadow_prealloc_unpin(d, smfn);
* this domain's shadows */
static void shadow_blow_tables(struct domain *d)
{
- struct shadow_page_info *sp, *t;
+ struct page_info *sp, *t;
struct vcpu *v = d->vcpu[0];
mfn_t smfn;
int i;
/* Pass one: unpin all pinned pages */
page_list_for_each_safe_reverse(sp, t, &d->arch.paging.shadow.pinned_shadows)
{
- smfn = shadow_page_to_mfn(sp);
+ smfn = page_to_mfn(sp);
sh_unpin(v, smfn);
}
__initcall(shadow_blow_tables_keyhandler_init);
#endif /* !NDEBUG */
-#ifdef __i386__
-# define next_shadow(pg) ((pg)->next_shadow)
-# define set_next_shadow(pg, n) ((void)((pg)->next_shadow = (n)))
-#else
-static inline struct shadow_page_info *
-next_shadow(const struct shadow_page_info *sp)
+static inline struct page_info *
+next_shadow(const struct page_info *sp)
{
- return sp->next_shadow ? mfn_to_shadow_page(_mfn(sp->next_shadow)) : NULL;
+ return sp->next_shadow ? mfn_to_page(_mfn(sp->next_shadow)) : NULL;
}
+
static inline void
-set_next_shadow(struct shadow_page_info *sp, struct shadow_page_info *next)
+set_next_shadow(struct page_info *sp, struct page_info *next)
{
- sp->next_shadow = next ? mfn_x(shadow_page_to_mfn(next)) : 0;
+ sp->next_shadow = next ? mfn_x(page_to_mfn(next)) : 0;
}
-#endif
/* Allocate another shadow's worth of (contiguous, aligned) pages,
* and fill in the type and backpointer fields of their page_infos.
u32 shadow_type,
unsigned long backpointer)
{
- struct shadow_page_info *sp = NULL;
+ struct page_info *sp = NULL;
unsigned int order = shadow_order(shadow_type);
cpumask_t mask;
void *p;
flush_tlb_mask(mask);
}
/* Now safe to clear the page for reuse */
- p = sh_map_domain_page(shadow_page_to_mfn(sp+i));
+ p = sh_map_domain_page(page_to_mfn(sp+i));
ASSERT(p != NULL);
clear_page(p);
sh_unmap_domain_page(p);
set_next_shadow(&sp[i], NULL);
perfc_incr(shadow_alloc_count);
}
- return shadow_page_to_mfn(sp);
+ return page_to_mfn(sp);
}
/* Return some shadow pages to the pool. */
void shadow_free(struct domain *d, mfn_t smfn)
{
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ struct page_info *sp = mfn_to_page(smfn);
u32 shadow_type;
unsigned long order;
unsigned long mask;
for ( ; order < shadow_max_order(d); ++order )
{
mask = 1 << order;
- if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
+ if ( (mfn_x(page_to_mfn(sp)) & mask) ) {
/* Merge with predecessor block? */
if ( ((sp-mask)->u.sh.type != PGT_none) ||
((sp-mask)->v.free.order != order) )
unsigned int pages,
int *preempted)
{
- struct shadow_page_info *sp;
+ struct page_info *sp;
unsigned int lower_bound;
unsigned int j, order = shadow_max_order(d);
if ( d->arch.paging.shadow.total_pages < pages )
{
/* Need to allocate more memory from domheap */
- sp = (struct shadow_page_info *)
+ sp = (struct page_info *)
alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));
if ( sp == NULL )
{
static void sh_hash_audit_bucket(struct domain *d, int bucket)
/* Audit one bucket of the hash table */
{
- struct shadow_page_info *sp, *x;
+ struct page_info *sp, *x;
if ( !(SHADOW_AUDIT_ENABLE) )
return;
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
" and not OOS but has typecount %#lx\n",
sp->v.sh.back,
- mfn_x(shadow_page_to_mfn(sp)),
+ mfn_x(page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
}
{
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
" but has typecount %#lx\n",
- sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
+ sp->v.sh.back, mfn_x(page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
}
* Returns 0 for success, 1 for error. */
static int shadow_hash_alloc(struct domain *d)
{
- struct shadow_page_info **table;
+ struct page_info **table;
ASSERT(shadow_locked_by_me(d));
ASSERT(!d->arch.paging.shadow.hash_table);
- table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
+ table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
if ( !table ) return 1;
memset(table, 0,
- SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));
+ SHADOW_HASH_BUCKETS * sizeof (struct page_info *));
d->arch.paging.shadow.hash_table = table;
return 0;
}
* or INVALID_MFN if it doesn't exist */
{
struct domain *d = v->domain;
- struct shadow_page_info *sp, *prev;
+ struct page_info *sp, *prev;
key_t key;
ASSERT(shadow_locked_by_me(d));
{
if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
/* Can't reorder: someone is walking the hash chains */
- return shadow_page_to_mfn(sp);
+ return page_to_mfn(sp);
else
{
ASSERT(prev);
{
perfc_incr(shadow_hash_lookup_head);
}
- return shadow_page_to_mfn(sp);
+ return page_to_mfn(sp);
}
prev = sp;
sp = next_shadow(sp);
/* Put a mapping (n,t)->smfn into the hash table */
{
struct domain *d = v->domain;
- struct shadow_page_info *sp;
+ struct page_info *sp;
key_t key;
ASSERT(shadow_locked_by_me(d));
sh_hash_audit_bucket(d, key);
/* Insert this shadow at the top of the bucket */
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
d->arch.paging.shadow.hash_table[key] = sp;
/* Excise the mapping (n,t)->smfn from the hash table */
{
struct domain *d = v->domain;
- struct shadow_page_info *sp, *x;
+ struct page_info *sp, *x;
key_t key;
ASSERT(shadow_locked_by_me(d));
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
if ( d->arch.paging.shadow.hash_table[key] == sp )
/* Easy case: we're deleting the head item. */
d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
{
int i, done = 0;
struct domain *d = v->domain;
- struct shadow_page_info *x;
+ struct page_info *x;
/* Say we're here, to stop hash-lookups reordering the chains */
ASSERT(shadow_locked_by_me(d));
{
ASSERT(x->u.sh.type <= 15);
ASSERT(callbacks[x->u.sh.type] != NULL);
- done = callbacks[x->u.sh.type](v, shadow_page_to_mfn(x),
+ done = callbacks[x->u.sh.type](v, page_to_mfn(x),
callback_mfn);
if ( done ) break;
}
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
{
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ struct page_info *sp = mfn_to_page(smfn);
unsigned int t = sp->u.sh.type;
{
unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
- int shtype = mfn_to_shadow_page(last_smfn)->u.sh.type;
+ int shtype = mfn_to_page(last_smfn)->u.sh.type;
if ( callbacks[shtype] )
callbacks[shtype](v, last_smfn, gmfn);
int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ struct page_info *sp = mfn_to_page(smfn);
ASSERT(mfn_valid(smfn));
ASSERT(mfn_valid(gmfn));
/* Follow this shadow's up-pointer, if it has one, and remove the reference
* found there. Returns 1 if that was the only reference to this shadow */
{
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ struct page_info *sp = mfn_to_page(smfn);
mfn_t pmfn;
void *vaddr;
int rc;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
- struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
+ struct page_info *sp = mfn_to_page(sl1mfn);
mfn_t gl1mfn = _mfn(sp->v.sh.back);
/* If the shadow is a fl1 then the backpointer contains
do { \
int _i; \
shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn)); \
- ASSERT(mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow \
- || mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
+ ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow \
+ || mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \
{ \
(_sl1e) = _sp + _i; \
do { \
int _i, _j, __done = 0; \
int _xen = !shadow_mode_external(_dom); \
- ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
+ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
for ( _j = 0; _j < 4 && !__done; _j++ ) \
{ \
shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn); \
int _i; \
int _xen = !shadow_mode_external(_dom); \
shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn)); \
- ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
- || mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
+ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
+ || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
if ( (!(_xen)) \
- || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
+ || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
|| ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES)) \
< (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
{ \
int _i; \
int _xen = !shadow_mode_external(_dom); \
shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn)); \
- ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
- mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
+ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
+ mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
{ \
if ( (!(_xen)) \
|| !is_pv_32on64_domain(_dom) \
- || mfn_to_shadow_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
+ || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
|| (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \
{ \
(_sl2e) = _sp + _i; \
do { \
int _i; \
shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn)); \
- ASSERT(mfn_to_shadow_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
+ ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ ) \
{ \
(_sl3e) = _sp + _i; \
shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn)); \
int _xen = !shadow_mode_external(_dom); \
int _i; \
- ASSERT(mfn_to_shadow_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
+ ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \
{ \
if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) ) \
&& shadow_type != SH_type_l2h_pae_shadow
&& shadow_type != SH_type_l4_64_shadow )
/* Lower-level shadow, not yet linked form a higher level */
- mfn_to_shadow_page(smfn)->up = 0;
+ mfn_to_page(smfn)->up = 0;
#if GUEST_PAGING_LEVELS == 4
#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
* of them, decide that this isn't an old linux guest, and stop
* pinning l3es. This is not very quick but it doesn't happen
* very often. */
- struct shadow_page_info *sp, *t;
+ struct page_info *sp, *t;
struct vcpu *v2;
int l4count = 0, vcpus = 0;
page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
page_list_for_each_safe(sp, t, &v->domain->arch.paging.shadow.pinned_shadows)
{
if ( sp->u.sh.type == SH_type_l3_64_shadow )
- sh_unpin(v, shadow_page_to_mfn(sp));
+ sh_unpin(v, page_to_mfn(sp));
}
v->domain->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
}
void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l4e_t *sl4e;
- u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+ u32 t = mfn_to_page(smfn)->u.sh.type;
mfn_t gmfn, sl4mfn;
SHADOW_DEBUG(DESTROY_SHADOW,
ASSERT(t == SH_type_l4_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+ gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l3e_t *sl3e;
- u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+ u32 t = mfn_to_page(smfn)->u.sh.type;
mfn_t gmfn, sl3mfn;
SHADOW_DEBUG(DESTROY_SHADOW,
ASSERT(t == SH_type_l3_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+ gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l2e_t *sl2e;
- u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+ u32 t = mfn_to_page(smfn)->u.sh.type;
mfn_t gmfn, sl2mfn;
SHADOW_DEBUG(DESTROY_SHADOW,
#endif
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+ gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
{
struct domain *d = v->domain;
shadow_l1e_t *sl1e;
- u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+ u32 t = mfn_to_page(smfn)->u.sh.type;
SHADOW_DEBUG(DESTROY_SHADOW,
"%s(%05lx)\n", __func__, mfn_x(smfn));
/* Record that the guest page isn't shadowed any more (in this type) */
if ( t == SH_type_fl1_shadow )
{
- gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
+ gfn_t gfn = _gfn(mfn_to_page(smfn)->v.sh.back);
delete_fl1_shadow_status(v, gfn, smfn);
}
else
{
- mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+ mfn_t gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
{
struct domain *d = v->domain;
- ASSERT(mfn_to_shadow_page(mmfn)->u.sh.type == SH_type_monitor_table);
+ ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
{
#if SHADOW_PAGING_LEVELS == 3
reserved_xen_slot =
- ((mfn_to_shadow_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
+ ((mfn_to_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
(shadow_index
>= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
#else /* SHADOW_PAGING_LEVELS == 2 */
result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+ gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
if ( mfn_valid(gl1mfn)
&& mfn_is_out_of_sync(gl1mfn) )
{
* called in the *mode* of the vcpu that unsynced it. Clear? Good. */
int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
{
- struct shadow_page_info *sp;
+ struct page_info *sp;
mfn_t smfn;
smfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
ASSERT(mfn_valid(smfn)); /* Otherwise we would not have been called */
/* Up to l2 */
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
if ( sp->u.sh.count != 1 || !sp->up )
return 0;
smfn = _mfn(sp->up >> PAGE_SHIFT);
#if (SHADOW_PAGING_LEVELS == 4)
/* up to l3 */
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
if ( sp->u.sh.count != 1 || !sp->up )
return 0;
smfn = _mfn(sp->up >> PAGE_SHIFT);
ASSERT(mfn_valid(smfn));
/* up to l4 */
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
if ( sp->u.sh.count != 1
|| sh_type_is_pinnable(v, SH_type_l3_64_shadow) || !sp->up )
return 0;
+ shadow_l2_linear_offset(va)),
sizeof(sl2e)) != 0)
|| !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
- || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
+ || !mfn_valid(gl1mfn = _mfn(mfn_to_page(
shadow_l2e_get_mfn(sl2e))->v.sh.back))
|| unlikely(mfn_is_out_of_sync(gl1mfn)) )
{
// easier than invalidating all of the individual 4K pages).
//
sl1mfn = shadow_l2e_get_mfn(sl2e);
- if ( mfn_to_shadow_page(sl1mfn)->u.sh.type
+ if ( mfn_to_page(sl1mfn)->u.sh.type
== SH_type_fl1_shadow )
{
flush_tlb_local();
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Check to see if the SL1 is out of sync. */
{
- mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+ mfn_t gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
struct page_info *pg = mfn_to_page(gl1mfn);
if ( mfn_valid(gl1mfn)
&& page_is_out_of_sync(pg) )
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+ gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
pg = mfn_to_page(gl1mfn);
if ( likely(sh_mfn_is_a_page_table(gl1mfn)
/* Need to repin the old toplevel shadow if it's been unpinned
* by shadow_prealloc(): in PV mode we're still running on this
* shadow and it's not safe to free it yet. */
- if ( !mfn_to_shadow_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
+ if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
{
SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
domain_crash(v->domain);
{
int r;
shadow_l1e_t *sl1p, sl1e;
- struct shadow_page_info *sp;
+ struct page_info *sp;
ASSERT(mfn_valid(gmfn));
ASSERT(mfn_valid(smfn));
- sp = mfn_to_shadow_page(smfn);
+ sp = mfn_to_page(smfn);
if ( sp->count_info != 0
|| (sp->u.sh.type != SH_type_l1_shadow
void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
/* Blank out a single shadow entry */
{
- switch ( mfn_to_shadow_page(smfn)->u.sh.type )
+ switch ( mfn_to_page(smfn)->u.sh.type )
{
case SH_type_l1_shadow:
(void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
&& (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
{
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
- if ( mfn_to_shadow_page(sl1mfn)->u.sh.type == 0 )
+ if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
&& (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
{
(void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
- if ( mfn_to_shadow_page(sl2mfn)->u.sh.type == 0 )
+ if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
&& (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
{
(void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
- if ( mfn_to_shadow_page(sl3mfn)->u.sh.type == 0 )
+ if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
int done = 0;
/* Follow the backpointer */
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+ gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
int done = 0;
/* Follow the backpointer */
- gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
+ gl2mfn = _mfn(mfn_to_page(sl2mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
int done = 0;
/* Follow the backpointer */
- gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
+ gl3mfn = _mfn(mfn_to_page(sl3mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
int done = 0;
/* Follow the backpointer */
- gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
+ gl4mfn = _mfn(mfn_to_page(sl4mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */